First, the most common packages for the following analyses have to be loaded and adjustments to the output are made.
# We load the helper-package pacman:
if (!require("pacman")) install.packages("pacman")
if (!require("devtools")) install.packages("devtools")
if (!require("tidystats")) install_github("willemsleegers/tidystats")
# So that we don't have to type install.packages("PACKAGENAME"); library(PACKAGENAME)
# for every single package. Instead, we can just type p_load() and
# provide a list of desired packages in parentheses:
pacman::p_load(tidyverse
,Routliers
,psych
,dplyr
,car
,TOSTER)
knitr::opts_chunk$set(message = FALSE,
warning = FALSE) # we don't want to see messages or warnings in the output
SPACE02_data <- read.csv2("data/SPACE02_data_wide_adjusted.csv", na="NA")
Total of the three different question forms.
SPACE02_data$total_count <- (SPACE02_data$recall_count + SPACE02_data$open.questions_count + SPACE02_data$transfer.questions_count)
SPACE02_data$practice <- as.factor(SPACE02_data$practice)
SPACE02_data$multi <- as.factor(SPACE02_data$multi)
SPACE02_data$total_count <- as.numeric(SPACE02_data$total_count)
SPACE02_data$UVkomb <- interaction(SPACE02_data$multi, SPACE02_data$practice)
SPACE02_data$UVkomb <- as.factor(SPACE02_data$UVkomb)
Result is a variable UVkomb with four factors:
1. mono.massed µ1
2. mono.spaced µ2
3. multi.massed µ3
4. multi.spaced µ4
Data exclusion was executed manually within the csv-file with the exception of the outliers.
- code here didn’t work, couldn’t find out why…
# "participated in the same study"
# Remove rows by a column value
# df[!(df$cyl == 6),]
SPACE02_data_adjusted <- SPACE02_data[!(SPACE02_data$participated.before == 'Ich habe schon einmal an DERSELBEN Studie teilgenommen.'),]
# 0 dataset removed
# external help "Ja"
# SPACE02_data_adjusted <- # SPACE02_data_adjusted[!(SPACE02_data_adjusted$external.help_s1 == 'yes'),]
# SPACE02_data_adjusted <- # SPACE02_data_adjusted[!(SPACE02_data_adjusted$external.help_s2 == 'yes'),]
# external help
#SPACE02_data_adjusted <- SPACE02_data_adjusted[!(SPACE02_data_adjusted$external.help_post == 'yes'),]
# 9 datasets removed 114, 14, 24, 48, 139, 32, 39, 120, 128
# Diese Zeilen sollen entfernt werden
# drops <- c(114, 14, 24, 48, 139, 32, 39, 120, 128, 36, 43, 87)
# Zeilen löschen
# SPACE02_data_adjusted <- SPACE02_data_adjusted[-drops,]
# 12 datasets removed manually due to external help before import within the excel csv data - code here didn't work, couldn't find out why...
library(Routliers)
res1 <- outliers_mad(x = SPACE02_data_adjusted$total_count)
print(res1)
## Call:
## outliers_mad.default(x = SPACE02_data_adjusted$total_count)
##
## Median:
## [1] 28
##
## MAD:
## [1] 11.8608
##
## Limits of acceptable range of values:
## [1] -7.5824 63.5824
##
## Number of detected outliers
## extremely low extremely high total
## 0 1 1
plot_outliers_mad(res1, x = SPACE02_data_adjusted$total_count)
# Dataset 41 removed (total_recall 67.0 > 63.58)
# Diese Zeilen sollen entfernt werden
drops_outlier <- c(41)
# Zeilen löschen
SPACE02_data_adjusted <- SPACE02_data_adjusted[-drops_outlier,]
SPACE02_data_adjusted
Variables:
UV1 = multi (mono, multi)
UV2 = practice (massed, spaced)
AV1 = recall_count
AV2 = open.questions_count
AV3 = transfer.questions_count
# https://www.youtube.com/watch?v=ZJB_Ya964tY
describeBy(recall_count ~ multi + practice, mat=TRUE, data = SPACE02_data_adjusted)
| item | group1 | group2 | vars | n | mean | sd | median | trimmed | mad | min | max | range | skew | kurtosis | se | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| X11 | 1 | mono | massed | 1 | 44 | 18.46591 | 6.859383 | 18.75 | 18.40278 | 7.78365 | 4.0 | 39.5 | 35.5 | 0.3034930 | 0.2819421 | 1.0340909 |
| X12 | 2 | multi | massed | 1 | 39 | 20.69231 | 5.293511 | 19.00 | 20.71212 | 6.67170 | 9.5 | 34.0 | 24.5 | 0.1366550 | -0.3883242 | 0.8476401 |
| X13 | 3 | mono | spaced | 1 | 41 | 19.17073 | 6.912497 | 19.00 | 18.86364 | 8.15430 | 5.0 | 38.5 | 33.5 | 0.4226669 | 0.0307775 | 1.0795507 |
| X14 | 4 | multi | spaced | 1 | 40 | 22.28750 | 6.261285 | 22.50 | 22.43750 | 7.41300 | 4.0 | 36.0 | 32.0 | -0.3271377 | 0.2411976 | 0.9899960 |
boxplot(recall_count ~ multi + practice, data = SPACE02_data_adjusted)
summary(SPACE02_data_adjusted)
## ID condition multi practice gender
## Min. :6155132 Length:164 mono :85 massed:83 Length:164
## 1st Qu.:6217849 Class :character multi:79 spaced:81 Class :character
## Median :6332452 Mode :character Mode :character
## Mean :6312775
## 3rd Qu.:6383423
## Max. :6476953
##
## age language german.since educational.attainment
## Min. :18.00 Length:164 Min. : 7.00 Length:164
## 1st Qu.:25.75 Class :character 1st Qu.:20.00 Class :character
## Median :39.00 Mode :character Median :26.00 Mode :character
## Mean :39.13 Mean :28.71
## 3rd Qu.:51.25 3rd Qu.:30.00
## Max. :77.00 Max. :66.00
## NA's :143
## delay_post_s1 delay_post_s2 delay_s2_s1 cognitive.effort
## Length:164 Length:164 Length:164 Min. :1.000
## Class :character Class :character Class :character 1st Qu.:4.750
## Mode :character Mode :character Mode :character Median :6.000
## Mean :5.817
## 3rd Qu.:7.250
## Max. :9.000
##
## prior.knowledge interest jol_interim jol_final
## Min. : 0.00 Min. : 0.00 Min. : 0.00 Min. : 0.00
## 1st Qu.: 8.00 1st Qu.: 50.00 1st Qu.: 40.00 1st Qu.: 34.00
## Median :20.00 Median : 72.00 Median : 66.00 Median : 55.00
## Mean :23.05 Mean : 66.38 Mean : 59.91 Mean : 53.95
## 3rd Qu.:33.25 3rd Qu.: 84.25 3rd Qu.: 77.25 3rd Qu.: 75.00
## Max. :87.00 Max. :100.00 Max. :100.00 Max. :100.00
##
## pre.test_MC_no pre.test_MC_acc recall_count recall_accuracy
## Min. :0.000 Min. :0.0000 Min. : 4.00 Min. :0.06154
## 1st Qu.:2.000 1st Qu.:0.3333 1st Qu.:16.38 1st Qu.:0.25192
## Median :3.000 Median :0.5000 Median :19.50 Median :0.30000
## Mean :2.957 Mean :0.4929 Mean :20.10 Mean :0.30929
## 3rd Qu.:4.000 3rd Qu.:0.6667 3rd Qu.:25.00 3rd Qu.:0.38462
## Max. :6.000 Max. :1.0000 Max. :39.50 Max. :0.60769
##
## open.questions_count open.questions_acc transfer.questions_count
## Min. : 0.000 Min. :0.0000 Min. : 0.000
## 1st Qu.: 2.500 1st Qu.:0.1923 1st Qu.: 2.000
## Median : 4.500 Median :0.3462 Median : 4.000
## Mean : 4.302 Mean :0.3309 Mean : 4.198
## 3rd Qu.: 6.000 3rd Qu.:0.4615 3rd Qu.: 6.000
## Max. :12.000 Max. :0.9231 Max. :18.000
##
## transfer.questions_acc imi.motiv.mean imi.comp.mean imi.press.mean
## Min. :0.00000 Min. : 3.00 Min. : 4.00 Min. : 3.0
## 1st Qu.:0.05556 1st Qu.: 8.00 1st Qu.:11.00 1st Qu.: 9.0
## Median :0.11111 Median :12.00 Median :15.00 Median :12.0
## Mean :0.11662 Mean :11.68 Mean :14.12 Mean :11.6
## 3rd Qu.:0.16667 3rd Qu.:16.00 3rd Qu.:17.00 3rd Qu.:14.0
## Max. :0.50000 Max. :21.00 Max. :22.00 Max. :20.0
##
## pft_solved.sum pft_acc.of.solved pft_acc disturbed_s1
## Min. : 4.000 Length:164 Length:164 Min. : 0.0000
## 1st Qu.: 7.000 Class :character Class :character 1st Qu.: 0.0000
## Median : 9.000 Mode :character Mode :character Median : 0.0000
## Mean : 8.506 Mean : 0.9817
## 3rd Qu.:10.000 3rd Qu.: 2.0000
## Max. :10.000 Max. :10.0000
##
## concentration_s1 difficulties_s1 difficulties.note_s1 readability_s1
## Min. : 0.000 Length:164 Length:164 Length:164
## 1st Qu.: 7.000 Class :character Class :character Class :character
## Median : 9.000 Mode :character Mode :character Mode :character
## Mean : 8.079
## 3rd Qu.:10.000
## Max. :10.000
##
## external.help_s1 exam_s1 enjoy.reading_s1 reading.per.week_s1
## Length:164 Length:164 Length:164 Min. : 0.00
## Class :character Class :character Class :character 1st Qu.: 6.00
## Mode :character Mode :character Mode :character Median :14.50
## Mean :15.72
## 3rd Qu.:20.25
## Max. :56.00
##
## fsk.reading1_s1 fsk.reading2_s1 fsk.reading3_s1 disturbed_s2
## Length:164 Length:164 Length:164 Min. :0.000
## Class :character Class :character Class :character 1st Qu.:0.000
## Mode :character Mode :character Mode :character Median :0.000
## Mean :1.272
## 3rd Qu.:2.000
## Max. :8.000
## NA's :83
## concentration_s2 difficulties_s2 difficulties.note_s2 readability_s2
## Min. : 0.000 Length:164 Length:164 Length:164
## 1st Qu.: 6.000 Class :character Class :character Class :character
## Median : 9.000 Mode :character Mode :character Mode :character
## Mean : 7.519
## 3rd Qu.:10.000
## Max. :10.000
## NA's :83
## external.help_s2 exam_s2 enjoy.reading_s2 reading.per.week_s2
## Length:164 Length:164 Length:164 Min. : 1.0
## Class :character Class :character Class :character 1st Qu.: 8.0
## Mode :character Mode :character Mode :character Median :14.0
## Mean :15.1
## 3rd Qu.:20.0
## Max. :40.0
## NA's :83
## fsk.reading1_s2 fsk.reading2_s2 fsk.reading3_s2 disturbed_post
## Length:164 Length:164 Length:164 Min. : 0.000
## Class :character Class :character Class :character 1st Qu.: 0.000
## Mode :character Mode :character Mode :character Median : 1.000
## Mean : 1.384
## 3rd Qu.: 2.000
## Max. :10.000
##
## concentration_post difficulties_post difficulties.note_post
## Min. : 0.000 Length:164 Length:164
## 1st Qu.: 6.750 Class :character Class :character
## Median : 9.000 Mode :character Mode :character
## Mean : 7.866
## 3rd Qu.: 9.250
## Max. :10.000
##
## external.help_post participated.before seriousness notes
## Length:164 Length:164 Length:164 Length:164
## Class :character Class :character Class :character Class :character
## Mode :character Mode :character Mode :character Mode :character
##
##
##
##
## total_count UVkomb
## Min. : 4.00 mono.massed :44
## 1st Qu.:21.00 multi.massed:39
## Median :27.75 mono.spaced :41
## Mean :28.60 multi.spaced:40
## 3rd Qu.:36.62
## Max. :63.00
##
SPACE02_data_adjusted$gender <- as.factor(SPACE02_data_adjusted$gender)
hist(SPACE02_data_adjusted$age)
describe(SPACE02_data_adjusted$age)
| vars | n | mean | sd | median | trimmed | mad | min | max | range | skew | kurtosis | se | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| X1 | 1 | 164 | 39.12805 | 14.44947 | 39 | 38.40909 | 19.2738 | 18 | 77 | 59 | 0.3029985 | -1.007167 | 1.128314 |
table(SPACE02_data_adjusted$multi, SPACE02_data_adjusted$practice)
##
## massed spaced
## mono 44 41
## multi 39 40
For data-exclusion criteria
require("dplyr")
group_by(SPACE02_data_adjusted, multi, practice) %>%
summarise(
mean = mean(total_count, na.rm = TRUE),
sd = sd(total_count, na.rm = TRUE)
)
| multi | practice | mean | sd |
|---|---|---|---|
| mono | massed | 25.88636 | 11.631385 |
| mono | spaced | 26.90244 | 11.751712 |
| multi | massed | 29.41026 | 9.343497 |
| multi | spaced | 32.55000 | 10.853217 |
require("dplyr")
group_by(SPACE02_data_adjusted, multi, practice) %>%
summarise(
mean = mean(recall_count, na.rm = TRUE),
sd = sd(recall_count, na.rm = TRUE)
)
| multi | practice | mean | sd |
|---|---|---|---|
| mono | massed | 18.46591 | 6.859383 |
| mono | spaced | 19.17073 | 6.912497 |
| multi | massed | 20.69231 | 5.293511 |
| multi | spaced | 22.28750 | 6.261285 |
require("dplyr")
group_by(SPACE02_data_adjusted, multi, practice) %>%
summarise(
mean = mean(open.questions_count, na.rm = TRUE),
sd = sd(open.questions_count, na.rm = TRUE)
)
| multi | practice | mean | sd |
|---|---|---|---|
| mono | massed | 3.500000 | 2.345208 |
| mono | spaced | 4.268293 | 2.962468 |
| multi | massed | 4.192308 | 2.432281 |
| multi | spaced | 5.325000 | 2.489851 |
require("dplyr")
group_by(SPACE02_data_adjusted, multi, practice) %>%
summarise(
mean = mean(transfer.questions_count, na.rm = TRUE),
sd = sd(transfer.questions_count, na.rm = TRUE)
)
| multi | practice | mean | sd |
|---|---|---|---|
| mono | massed | 3.920454 | 3.211432 |
| mono | spaced | 3.463415 | 2.988290 |
| multi | massed | 4.525641 | 2.709507 |
| multi | spaced | 4.937500 | 2.842416 |
box plot with multiple groups +++++++++++++++++++++ plot total performance (“total_count”) by groups (“practice”) color box plot by a second group: “multi”
# if(!require(devtools)) install.packages("devtools")
# devtools::install_github("kassambara/ggpubr")
# install.packages("ggpubr")
library("ggpubr")
# boxplot with two factor variables multi and practice
boxplot(total_count ~ multi * practice, data=SPACE02_data_adjusted, frame = FALSE,
col = c("#00AFBB", "#E7B800"), ylab="Retention performance", xlab = "Group")
## Standard error
describeBy(recall_count ~ multi + practice, data = SPACE02_data_adjusted, mat = TRUE)
| item | group1 | group2 | vars | n | mean | sd | median | trimmed | mad | min | max | range | skew | kurtosis | se | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| X11 | 1 | mono | massed | 1 | 44 | 18.46591 | 6.859383 | 18.75 | 18.40278 | 7.78365 | 4.0 | 39.5 | 35.5 | 0.3034930 | 0.2819421 | 1.0340909 |
| X12 | 2 | multi | massed | 1 | 39 | 20.69231 | 5.293511 | 19.00 | 20.71212 | 6.67170 | 9.5 | 34.0 | 24.5 | 0.1366550 | -0.3883242 | 0.8476401 |
| X13 | 3 | mono | spaced | 1 | 41 | 19.17073 | 6.912497 | 19.00 | 18.86364 | 8.15430 | 5.0 | 38.5 | 33.5 | 0.4226669 | 0.0307775 | 1.0795507 |
| X14 | 4 | multi | spaced | 1 | 40 | 22.28750 | 6.261285 | 22.50 | 22.43750 | 7.41300 | 4.0 | 36.0 | 32.0 | -0.3271377 | 0.2411976 | 0.9899960 |
describeBy(open.questions_count ~ multi + practice, data = SPACE02_data_adjusted, mat = TRUE)
| item | group1 | group2 | vars | n | mean | sd | median | trimmed | mad | min | max | range | skew | kurtosis | se | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| X11 | 1 | mono | massed | 1 | 44 | 3.500000 | 2.345208 | 3.5 | 3.388889 | 2.96520 | 0 | 9.5 | 9.5 | 0.3554814 | -0.6503569 | 0.3535534 |
| X12 | 2 | multi | massed | 1 | 39 | 4.192308 | 2.432281 | 4.5 | 4.166667 | 2.22390 | 0 | 9.0 | 9.0 | 0.0644189 | -0.8599603 | 0.3894767 |
| X13 | 3 | mono | spaced | 1 | 41 | 4.268293 | 2.962468 | 4.0 | 4.060606 | 2.96520 | 0 | 12.0 | 12.0 | 0.5107148 | -0.2871451 | 0.4626598 |
| X14 | 4 | multi | spaced | 1 | 40 | 5.325000 | 2.489851 | 5.0 | 5.281250 | 2.59455 | 0 | 10.5 | 10.5 | 0.1083482 | -0.3234491 | 0.3936800 |
describeBy(transfer.questions_count ~ multi + practice, data = SPACE02_data_adjusted, mat = TRUE)
| item | group1 | group2 | vars | n | mean | sd | median | trimmed | mad | min | max | range | skew | kurtosis | se | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| X11 | 1 | mono | massed | 1 | 44 | 3.920454 | 3.211432 | 3.50 | 3.611111 | 2.96520 | 0 | 18.0 | 18.0 | 1.8717678 | 6.0021612 | 0.4841415 |
| X12 | 2 | multi | massed | 1 | 39 | 4.525641 | 2.709507 | 4.00 | 4.378788 | 2.22390 | 0 | 13.0 | 13.0 | 0.7828171 | 0.5675449 | 0.4338684 |
| X13 | 3 | mono | spaced | 1 | 41 | 3.463415 | 2.988290 | 3.50 | 3.151515 | 2.22390 | 0 | 17.5 | 17.5 | 2.4176136 | 9.3081127 | 0.4666925 |
| X14 | 4 | multi | spaced | 1 | 40 | 4.937500 | 2.842416 | 4.25 | 4.859375 | 2.59455 | 0 | 13.0 | 13.0 | 0.3926822 | -0.1946393 | 0.4494254 |
# SPACE02_subset <- SPACE02_data_adjusted[,c("age", "multi", "recall_count", "open.questions_count", "transfer.questions_count")]
# SPACE02_subset_descr <- describeBy(SPACE02_subset, multi, skew = FALSE)
# print(SPACE02_subset_descr)
Metric dependent variables: yes, Likert-scales.
Levene-Test for homogeneity of variances
From the output above we can see that the p-value is not less than the significance level of 0.05.
This means that there is no evidence to suggest that the variance across groups is statistically significantly different.
Therefore, we can assume the homogeneity of variances in the different treatment groups.
Plot, normality:
As all the points fall approximately along this reference line, we can assume normality.
Shapiro-Wilk-Test, normality:
The conclusion above, is for anova1 supported by the Shapiro-Wilk test on the ANOVA residuals (W = 0.98383, p = 0.04721) which finds no indication that normality is violated.
For anova2 with W = 0.98014 and p = 0.01601 (below 0.05), the data significantly deviate from a normal distribution.
AV1anova <- aov(recall_count ~ multi + practice + multi*practice, data = SPACE02_data_adjusted)
AV2anova <- aov(open.questions_count ~ multi + practice + multi*practice, data = SPACE02_data_adjusted)
AV3anova <- aov(transfer.questions_count ~ multi + practice + multi*practice, data = SPACE02_data_adjusted)
summary(AV1anova)
## Df Sum Sq Mean Sq F value Pr(>F)
## multi 1 297 297.19 7.284 0.00771 **
## practice 1 53 52.68 1.291 0.25753
## multi:practice 1 8 8.11 0.199 0.65633
## Residuals 160 6528 40.80
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
summary(AV2anova)
## Df Sum Sq Mean Sq F value Pr(>F)
## multi 1 32.8 32.82 4.981 0.0270 *
## practice 1 36.5 36.50 5.541 0.0198 *
## multi:practice 1 1.4 1.36 0.206 0.6504
## Residuals 160 1054.1 6.59
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
summary(AV3anova)
## Df Sum Sq Mean Sq F value Pr(>F)
## multi 1 43.8 43.79 5.024 0.0264 *
## practice 1 0.1 0.06 0.007 0.9340
## multi:practice 1 7.7 7.72 0.886 0.3480
## Residuals 160 1394.7 8.72
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
library(DescTools)
EtaSq(AV1anova, type = 2, anova = FALSE)
## eta.sq eta.sq.part
## multi 0.042266260 0.042681076
## practice 0.007650407 0.008005322
## multi:practice 0.001177617 0.001240651
EtaSq(AV2anova, type = 2, anova = FALSE)
## eta.sq eta.sq.part
## multi 0.027701738 0.028710444
## practice 0.032453855 0.033470751
## multi:practice 0.001207597 0.001286907
EtaSq(AV3anova, type = 2, anova = FALSE)
## eta.sq eta.sq.part
## multi 3.031456e-02 3.047746e-02
## practice 4.144346e-05 4.297412e-05
## multi:practice 5.339770e-03 5.506732e-03
library(apaTables)
apa.2way.table(
multi, practice, recall_count, SPACE02_data_adjusted, filename = NA, table.number = 3, show.conf.interval = TRUE, show.marginal.means = FALSE, landscape = TRUE)
##
##
## Table 3
##
## Means and standard deviations for recall_count as a function of a 2(multi) X 2(practice) design
##
## M M_95%_CI SD
## practice:massed
## multi
## mono 18.47 [16.38, 20.55] 6.86
## multi 20.69 [18.98, 22.41] 5.29
##
## practice:spaced
## multi
## mono 19.17 [16.99, 21.35] 6.91
## multi 22.29 [20.29, 24.29] 6.26
##
## Note. M and SD represent mean and standard deviation, respectively.
## LL and UL indicate the lower and upper limits of the
## 95% confidence interval for the mean, respectively.
## The confidence interval is a plausible range of population means
## that could have created a sample mean (Cumming, 2014).
apa.2way.table(
multi, practice, open.questions_count, SPACE02_data_adjusted, filename = NA, table.number = 3, show.conf.interval = TRUE, show.marginal.means = FALSE, landscape = TRUE)
##
##
## Table 3
##
## Means and standard deviations for open.questions_count as a function of a 2(multi) X 2(practice) design
##
## M M_95%_CI SD
## practice:massed
## multi
## mono 3.50 [2.79, 4.21] 2.35
## multi 4.19 [3.40, 4.98] 2.43
##
## practice:spaced
## multi
## mono 4.27 [3.33, 5.20] 2.96
## multi 5.33 [4.53, 6.12] 2.49
##
## Note. M and SD represent mean and standard deviation, respectively.
## LL and UL indicate the lower and upper limits of the
## 95% confidence interval for the mean, respectively.
## The confidence interval is a plausible range of population means
## that could have created a sample mean (Cumming, 2014).
apa.2way.table(
multi, practice, transfer.questions_count, SPACE02_data_adjusted, filename = NA, table.number = 3, show.conf.interval = TRUE, show.marginal.means = FALSE, landscape = TRUE)
##
##
## Table 3
##
## Means and standard deviations for transfer.questions_count as a function of a 2(multi) X 2(practice) design
##
## M M_95%_CI SD
## practice:massed
## multi
## mono 3.92 [2.94, 4.90] 3.21
## multi 4.53 [3.65, 5.40] 2.71
##
## practice:spaced
## multi
## mono 3.46 [2.52, 4.41] 2.99
## multi 4.94 [4.03, 5.85] 2.84
##
## Note. M and SD represent mean and standard deviation, respectively.
## LL and UL indicate the lower and upper limits of the
## 95% confidence interval for the mean, respectively.
## The confidence interval is a plausible range of population means
## that could have created a sample mean (Cumming, 2014).
library(car)
leveneTest(recall_count ~ multi*practice, data= SPACE02_data_adjusted)
| Df | F value | Pr(>F) | |
|---|---|---|---|
| group | 3 | 0.820964 | 0.4840765 |
| 160 | NA | NA |
leveneTest(open.questions_count ~ multi*practice, data= SPACE02_data_adjusted)
| Df | F value | Pr(>F) | |
|---|---|---|---|
| group | 3 | 0.9657767 | 0.4104437 |
| 160 | NA | NA |
leveneTest(transfer.questions_count ~ multi*practice, data= SPACE02_data_adjusted)
| Df | F value | Pr(>F) | |
|---|---|---|---|
| group | 3 | 0.324261 | 0.8078188 |
| 160 | NA | NA |
# p-Wert nicht signifikant ---> 0-Hypothese wird nicht verworfen (lautet: Varianzhomogenität liegt vor)
# 0-Hypothese muss beibehalten werden bei p>0.05
plot(AV1anova, 2)
# summary(AV1anova)
plot(AV2anova, 2)
# summary(AV2anova)
plot(AV3anova, 2)
# summary(AV3anova)
interaction.plot(SPACE02_data_adjusted$multi, SPACE02_data_adjusted$practice, SPACE02_data_adjusted$recall_count,
main="Interaction diagram free recall question",
ylim = c(18, 23),
lwd=2,
ylab = "Mean",
xlab = "Material",
trace.label = "Practice",
type = "b",
col=c("blue","black"),
pch = c(17,19),
fixed = TRUE)
interaction.plot(SPACE02_data_adjusted$multi, SPACE02_data_adjusted$practice, SPACE02_data_adjusted$open.questions_count,
main="Interaction diagram open questions", col=c("red", "blue", "green"), lwd=2, ylab = "Mean", xlab = "Material", trace.label = "Practice")
interaction.plot(SPACE02_data_adjusted$multi, SPACE02_data_adjusted$practice, SPACE02_data_adjusted$transfer.questions_count,
main="Interaction diagram transfer questions", col=c("red", "blue", "green"), lwd=2, ylab = "Mean", xlab = "Material", trace.label = "Practice")
SPACE02_data_adjusted %>%
ggplot(aes(x = condition, y = recall_count)) +
geom_boxplot() +
theme_bw()
SPACE02_data_adjusted %>%
ggplot(aes(x = condition, y = open.questions_count)) +
geom_boxplot() +
theme_bw()
SPACE02_data_adjusted %>%
ggplot(aes(x = condition, y = transfer.questions_count)) +
geom_boxplot() +
theme_bw()
error.bars.by(recall_count ~ practice + multi + practice*multi, data = SPACE02_data_adjusted,
ylab = "Retention performance",
eyes = F,
within = F,
main = "Interaction diagram free recall question",
xlab="Material",
v.lab = c("Mono", "Multi"),
lty = c(2,2),
lines = TRUE,
ylim = c(15, 25))
legend("topleft",
c("Spaced", "Massed"),
lty = FALSE)
colors=c("blue","black")
boxplot(recall_count ~ practice + multi, data = SPACE02_data_adjusted,
ann = TRUE,
main = "Free recall question",
xlab = "Group",
ylab = "Retention performance")
boxplot(open.questions_count ~ practice + multi, data = SPACE02_data_adjusted,
ann = TRUE,
main = "Open questions",
xlab = "Group",
ylab = "Retention performance")
boxplot(transfer.questions_count ~ practice + multi, data = SPACE02_data_adjusted,
ann = TRUE,
main = "Transfer questions",
xlab = "Group",
ylab = "Retention performance")
error.bars.by(open.questions_count ~ practice + multi + practice*multi, data = SPACE02_data_adjusted, eyes = F, within = F, main = "Interaction diagram", xlab="Multi", v.lab = c("Multi", "Mono"), lty = c(1,2), ylim = c(2, 6.5))
legend("topleft", c("Massed", "Spaced"), lty = c(1,2))
boxplot(open.questions_count ~ practice + multi, data = SPACE02_data_adjusted)
error.bars.by(transfer.questions_count ~ practice + multi + practice*multi, data = SPACE02_data_adjusted, eyes = F, within = F, main = "Interaction diagram", xlab="Multi", v.lab = c("Multi", "Mono"), lty = c(1,2), ylim = c(2.5, 6))
legend("topleft", c("Massed", "Spaced"), lty = c(1,2))
boxplot(transfer.questions_count ~ practice + multi, data = SPACE02_data_adjusted)
Source:
https://stats.stackexchange.com/questions/190427/contrasts-in-anova-in-r
library(lsmeans)
(lsmAV1 <- lsmeans(AV1anova, ~ multi + practice))
## multi practice lsmean SE df lower.CL upper.CL
## mono massed 18.5 0.963 160 16.6 20.4
## multi massed 20.7 1.023 160 18.7 22.7
## mono spaced 19.2 0.998 160 17.2 21.1
## multi spaced 22.3 1.010 160 20.3 24.3
##
## Confidence level used: 0.95
(lsmAV2 <- lsmeans(AV2anova, ~ multi + practice))
## multi practice lsmean SE df lower.CL upper.CL
## mono massed 3.50 0.387 160 2.74 4.26
## multi massed 4.19 0.411 160 3.38 5.00
## mono spaced 4.27 0.401 160 3.48 5.06
## multi spaced 5.33 0.406 160 4.52 6.13
##
## Confidence level used: 0.95
(lsmAV3 <- lsmeans(AV3anova, ~ multi + practice))
## multi practice lsmean SE df lower.CL upper.CL
## mono massed 3.92 0.445 160 3.04 4.80
## multi massed 4.53 0.473 160 3.59 5.46
## mono spaced 3.46 0.461 160 2.55 4.37
## multi spaced 4.94 0.467 160 4.02 5.86
##
## Confidence level used: 0.95
contr1.1 <- list("Spacing alone" = c(-1, 0, 1, 0))
contr2.1 <- list("Multimedia alone" = c(-1, 1, 0, 0))
contr3.1 <- list("Interaction-Spacing" = c(0, 0, -1, 1))
contr3.2 <- list("Interaction-Multimedia" = c(0, -1, 0, 1))
contrast(lsmAV1, contr1.1)
## contrast estimate SE df t.ratio p.value
## Spacing alone 0.705 1.39 160 0.508 0.6119
contrast(lsmAV1, contr2.1)
## contrast estimate SE df t.ratio p.value
## Multimedia alone 2.23 1.4 160 1.585 0.1150
contrast(lsmAV1, contr3.1)
## contrast estimate SE df t.ratio p.value
## Interaction-Spacing 3.12 1.42 160 2.196 0.0296
contrast(lsmAV1, contr3.2)
## contrast estimate SE df t.ratio p.value
## Interaction-Multimedia 1.6 1.44 160 1.110 0.2688
contrast(lsmAV2, contr1.1)
## contrast estimate SE df t.ratio p.value
## Spacing alone 0.768 0.557 160 1.379 0.1698
contrast(lsmAV2, contr2.1)
## contrast estimate SE df t.ratio p.value
## Multimedia alone 0.692 0.565 160 1.226 0.2219
contrast(lsmAV2, contr3.1)
## contrast estimate SE df t.ratio p.value
## Interaction-Spacing 1.06 0.57 160 1.852 0.0658
contrast(lsmAV2, contr3.2)
## contrast estimate SE df t.ratio p.value
## Interaction-Multimedia 1.13 0.578 160 1.961 0.0516
contrast(lsmAV3, contr1.1)
## contrast estimate SE df t.ratio p.value
## Spacing alone -0.457 0.641 160 -0.713 0.4768
contrast(lsmAV3, contr2.1)
## contrast estimate SE df t.ratio p.value
## Multimedia alone 0.605 0.649 160 0.932 0.3527
contrast(lsmAV3, contr3.1)
## contrast estimate SE df t.ratio p.value
## Interaction-Spacing 1.47 0.656 160 2.247 0.0260
contrast(lsmAV3, contr3.2)
## contrast estimate SE df t.ratio p.value
## Interaction-Multimedia 0.412 0.664 160 0.620 0.5362
For calculating/reading convenience descriptive statistics for the variables appears first with every H/AV-combination
library(dplyr)
by_UVkomb <- group_by(SPACE02_data_adjusted, UVkomb)
summarise(by_UVkomb, mean_recall_count = mean(recall_count), sd_recall_count = sd(recall_count), n = n(), na.rm = TRUE)
| UVkomb | mean_recall_count | sd_recall_count | n | na.rm |
|---|---|---|---|---|
| mono.massed | 18.46591 | 6.859383 | 44 | TRUE |
| multi.massed | 20.69231 | 5.293511 | 39 | TRUE |
| mono.spaced | 19.17073 | 6.912497 | 41 | TRUE |
| multi.spaced | 22.28750 | 6.261285 | 40 | TRUE |
massed/text-spaced/text for recall_count
library(TOSTER)
TOSTtwo(m1 = 18.46591, m2 = 19.17073, sd1 = 6.859383, sd2 = 6.912498, n1 = 44, n2 = 41, low_eqbound_d = -0.25, high_eqbound_d = 0.25, alpha = 0.05, var.equal = FALSE)
## TOST results:
## t-value lower bound: 0.68 p-value lower bound: 0.249
## t-value upper bound: -1.62 p-value upper bound: 0.054
## degrees of freedom : 82.48
##
## Equivalence bounds (Cohen's d):
## low eqbound: -0.25
## high eqbound: 0.25
##
## Equivalence bounds (raw scores):
## low eqbound: -1.7215
## high eqbound: 1.7215
##
## TOST confidence interval:
## lower bound 90% CI: -3.192
## upper bound 90% CI: 1.782
##
## NHST confidence interval:
## lower bound 95% CI: -3.678
## upper bound 95% CI: 2.269
##
## Equivalence Test Result:
## The equivalence test was non-significant, t(82.48) = 0.680, p = 0.249, given equivalence bounds of -1.721 and 1.721 (on a raw scale) and an alpha of 0.05.
## Null Hypothesis Test Result:
## The null hypothesis test was non-significant, t(82.48) = -0.471, p = 0.639, given an alpha of 0.05.
## Based on the equivalence test and the null-hypothesis test combined, we can conclude that the observed effect is statistically not different from zero and statistically not equivalent to zero.
library(dplyr)
by_UVkomb <- group_by(SPACE02_data_adjusted, UVkomb)
summarise(by_UVkomb, mean_open.questions_count = mean(open.questions_count), sd_open.questions_count = sd(open.questions_count), n = n(), na.rm = TRUE)
| UVkomb | mean_open.questions_count | sd_open.questions_count | n | na.rm |
|---|---|---|---|---|
| mono.massed | 3.500000 | 2.345208 | 44 | TRUE |
| multi.massed | 4.192308 | 2.432281 | 39 | TRUE |
| mono.spaced | 4.268293 | 2.962468 | 41 | TRUE |
| multi.spaced | 5.325000 | 2.489851 | 40 | TRUE |
massed/text-spaced/text for open questions
library(TOSTER)
TOSTtwo(m1 = 3.500000, m2 = 4.268293, sd1 = 2.345208, sd2 = 2.962468, n1 = 44, n2 = 41, low_eqbound_d = -0.25, high_eqbound_d = 0.25, alpha = 0.05, var.equal = FALSE)
## TOST results:
## t-value lower bound: -0.172 p-value lower bound: 0.568
## t-value upper bound: -2.47 p-value upper bound: 0.008
## degrees of freedom : 76.19
##
## Equivalence bounds (Cohen's d):
## low eqbound: -0.25
## high eqbound: 0.25
##
## Equivalence bounds (raw scores):
## low eqbound: -0.6679
## high eqbound: 0.6679
##
## TOST confidence interval:
## lower bound 90% CI: -1.738
## upper bound 90% CI: 0.201
##
## NHST confidence interval:
## lower bound 95% CI: -1.928
## upper bound 95% CI: 0.391
##
## Equivalence Test Result:
## The equivalence test was non-significant, t(76.19) = -0.172, p = 0.568, given equivalence bounds of -0.668 and 0.668 (on a raw scale) and an alpha of 0.05.
## Null Hypothesis Test Result:
## The null hypothesis test was non-significant, t(76.19) = -1.319, p = 0.191, given an alpha of 0.05.
## Based on the equivalence test and the null-hypothesis test combined, we can conclude that the observed effect is statistically not different from zero and statistically not equivalent to zero.
library(dplyr)
by_UVkomb <- group_by(SPACE02_data_adjusted, UVkomb)
summarise(by_UVkomb, mean_transfer.questions_count = mean(transfer.questions_count), sd_transfer.questions_count = sd(transfer.questions_count), n = n(), na.rm = TRUE)
| UVkomb | mean_transfer.questions_count | sd_transfer.questions_count | n | na.rm |
|---|---|---|---|---|
| mono.massed | 3.920454 | 3.211432 | 44 | TRUE |
| multi.massed | 4.525641 | 2.709507 | 39 | TRUE |
| mono.spaced | 3.463415 | 2.988290 | 41 | TRUE |
| multi.spaced | 4.937500 | 2.842416 | 40 | TRUE |
massed/text-massed/multimedia for open questions
library(TOSTER)
TOSTtwo(m1 = 3.920455, m2 = 3.463415, sd1 = 3.211432, sd2 = 2.988290, n1 = 44, n2 = 41, low_eqbound_d = -0.2, high_eqbound_d = 0.2, alpha = 0.05, var.equal = FALSE)
## TOST results:
## t-value lower bound: 1.60 p-value lower bound: 0.056
## t-value upper bound: -0.243 p-value upper bound: 0.404
## degrees of freedom : 83
##
## Equivalence bounds (Cohen's d):
## low eqbound: -0.2
## high eqbound: 0.2
##
## Equivalence bounds (raw scores):
## low eqbound: -0.6204
## high eqbound: 0.6204
##
## TOST confidence interval:
## lower bound 90% CI: -0.662
## upper bound 90% CI: 1.576
##
## NHST confidence interval:
## lower bound 95% CI: -0.88
## upper bound 95% CI: 1.795
##
## Equivalence Test Result:
## The equivalence test was non-significant, t(83) = -0.243, p = 0.404, given equivalence bounds of -0.620 and 0.620 (on a raw scale) and an alpha of 0.05.
## Null Hypothesis Test Result:
## The null hypothesis test was non-significant, t(83) = 0.680, p = 0.499, given an alpha of 0.05.
## Based on the equivalence test and the null-hypothesis test combined, we can conclude that the observed effect is statistically not different from zero and statistically not equivalent to zero.
library(dplyr)
by_UVkomb <- group_by(SPACE02_data_adjusted, UVkomb)
summarise(by_UVkomb, mean_recall_count = mean(recall_count), sd_recall_count = sd(recall_count), n = n(), na.rm = TRUE)
| UVkomb | mean_recall_count | sd_recall_count | n | na.rm |
|---|---|---|---|---|
| mono.massed | 18.46591 | 6.859383 | 44 | TRUE |
| multi.massed | 20.69231 | 5.293511 | 39 | TRUE |
| mono.spaced | 19.17073 | 6.912497 | 41 | TRUE |
| multi.spaced | 22.28750 | 6.261285 | 40 | TRUE |
massed/text-massed/multimedia for recall_count
library(TOSTER)
TOSTtwo(m1 = 18.46591, m2 = 20.69231, sd1 = 6.859383, sd2 = 5.293511, n1 = 44, n2 = 39, low_eqbound_d = -0.25, high_eqbound_d = 0.25, alpha = 0.05, var.equal = FALSE)
## TOST results:
## t-value lower bound: -0.52 p-value lower bound: 0.698
## t-value upper bound: -2.81 p-value upper bound: 0.003
## degrees of freedom : 79.56
##
## Equivalence bounds (Cohen's d):
## low eqbound: -0.25
## high eqbound: 0.25
##
## Equivalence bounds (raw scores):
## low eqbound: -1.5317
## high eqbound: 1.5317
##
## TOST confidence interval:
## lower bound 90% CI: -4.452
## upper bound 90% CI: -0.001
##
## NHST confidence interval:
## lower bound 95% CI: -4.888
## upper bound 95% CI: 0.435
##
## Equivalence Test Result:
## The equivalence test was non-significant, t(79.56) = -0.520, p = 0.698, given equivalence bounds of -1.532 and 1.532 (on a raw scale) and an alpha of 0.05.
## Null Hypothesis Test Result:
## The null hypothesis test was non-significant, t(79.56) = -1.665, p = 0.0998, given an alpha of 0.05.
## Based on the equivalence test and the null-hypothesis test combined, we can conclude that the observed effect is statistically not different from zero and statistically not equivalent to zero.
library(dplyr)
by_UVkomb <- group_by(SPACE02_data_adjusted, UVkomb)
summarise(by_UVkomb, mean_open.questions_count = mean(open.questions_count), sd_open.questions_count = sd(open.questions_count), n = n(), na.rm = TRUE)
| UVkomb | mean_open.questions_count | sd_open.questions_count | n | na.rm |
|---|---|---|---|---|
| mono.massed | 3.500000 | 2.345208 | 44 | TRUE |
| multi.massed | 4.192308 | 2.432281 | 39 | TRUE |
| mono.spaced | 4.268293 | 2.962468 | 41 | TRUE |
| multi.spaced | 5.325000 | 2.489851 | 40 | TRUE |
massed/text-massed/multimedia for open questions
library(TOSTER)
TOSTtwo(m1 = 3.500000, m2 = 4.192308, sd1 = 2.345208, sd2 = 2.432281, n1 = 44, n2 = 39, low_eqbound_d = -0.25, high_eqbound_d = 0.25, alpha = 0.05, var.equal = FALSE)
## TOST results:
## t-value lower bound: -0.181 p-value lower bound: 0.571
## t-value upper bound: -2.45 p-value upper bound: 0.008
## degrees of freedom : 79.01
##
## Equivalence bounds (Cohen's d):
## low eqbound: -0.25
## high eqbound: 0.25
##
## Equivalence bounds (raw scores):
## low eqbound: -0.5973
## high eqbound: 0.5973
##
## TOST confidence interval:
## lower bound 90% CI: -1.568
## upper bound 90% CI: 0.183
##
## NHST confidence interval:
## lower bound 95% CI: -1.739
## upper bound 95% CI: 0.355
##
## Equivalence Test Result:
## The equivalence test was non-significant, t(79.01) = -0.181, p = 0.571, given equivalence bounds of -0.597 and 0.597 (on a raw scale) and an alpha of 0.05.
## Null Hypothesis Test Result:
## The null hypothesis test was non-significant, t(79.01) = -1.316, p = 0.192, given an alpha of 0.05.
## Based on the equivalence test and the null-hypothesis test combined, we can conclude that the observed effect is statistically not different from zero and statistically not equivalent to zero.
library(dplyr)
by_UVkomb <- group_by(SPACE02_data_adjusted, UVkomb)
summarise(by_UVkomb, mean_transfer.questions_count = mean(transfer.questions_count), sd_transfer.questions_count = sd(transfer.questions_count), n = n(), na.rm = TRUE)
| UVkomb | mean_transfer.questions_count | sd_transfer.questions_count | n | na.rm |
|---|---|---|---|---|
| mono.massed | 3.920454 | 3.211432 | 44 | TRUE |
| multi.massed | 4.525641 | 2.709507 | 39 | TRUE |
| mono.spaced | 3.463415 | 2.988290 | 41 | TRUE |
| multi.spaced | 4.937500 | 2.842416 | 40 | TRUE |
massed/text-massed/multimedia for transfer questions
library(TOSTER)
TOSTtwo(m1 = 3.920455, m2 = 4.525641, sd1 = 3.211432, sd2 = 2.709507, n1 = 44, n2 = 39, low_eqbound_d = -0.2, high_eqbound_d = 0.2, alpha = 0.05, var.equal = FALSE)
## TOST results:
## t-value lower bound: -0.0169 p-value lower bound: 0.507
## t-value upper bound: -1.84 p-value upper bound: 0.034
## degrees of freedom : 80.82
##
## Equivalence bounds (Cohen's d):
## low eqbound: -0.2
## high eqbound: 0.2
##
## Equivalence bounds (raw scores):
## low eqbound: -0.5942
## high eqbound: 0.5942
##
## TOST confidence interval:
## lower bound 90% CI: -1.687
## upper bound 90% CI: 0.477
##
## NHST confidence interval:
## lower bound 95% CI: -1.899
## upper bound 95% CI: 0.688
##
## Equivalence Test Result:
## The equivalence test was non-significant, t(80.82) = -0.0169, p = 0.507, given equivalence bounds of -0.594 and 0.594 (on a raw scale) and an alpha of 0.05.
## Null Hypothesis Test Result:
## The null hypothesis test was non-significant, t(80.82) = -0.931, p = 0.355, given an alpha of 0.05.
## Based on the equivalence test and the null-hypothesis test combined, we can conclude that the observed effect is statistically not different from zero and statistically not equivalent to zero.
library(dplyr)
by_UVkomb <- group_by(SPACE02_data_adjusted, UVkomb)
summarise(by_UVkomb, mean_recall_count = mean(recall_count), sd_recall_count = sd(recall_count), n = n(), na.rm = TRUE)
| UVkomb | mean_recall_count | sd_recall_count | n | na.rm |
|---|---|---|---|---|
| mono.massed | 18.46591 | 6.859383 | 44 | TRUE |
| multi.massed | 20.69231 | 5.293511 | 39 | TRUE |
| mono.spaced | 19.17073 | 6.912497 | 41 | TRUE |
| multi.spaced | 22.28750 | 6.261285 | 40 | TRUE |
spaced/multimedia-spaced/text for recall_count
library(TOSTER)
TOSTtwo(m1 = 22.28750, m2 = 19.17073, sd1 = 6.261285, sd2 = 6.912498, n1 = 40, n2 = 41, low_eqbound_d = -0.25, high_eqbound_d = 0.25, alpha = 0.05, var.equal = FALSE)
## TOST results:
## t-value lower bound: 3.25 p-value lower bound: 0.0008
## t-value upper bound: 1.00 p-value upper bound: 0.840
## degrees of freedom : 78.57
##
## Equivalence bounds (Cohen's d):
## low eqbound: -0.25
## high eqbound: 0.25
##
## Equivalence bounds (raw scores):
## low eqbound: -1.6487
## high eqbound: 1.6487
##
## TOST confidence interval:
## lower bound 90% CI: 0.679
## upper bound 90% CI: 5.555
##
## NHST confidence interval:
## lower bound 95% CI: 0.201
## upper bound 95% CI: 6.033
##
## Equivalence Test Result:
## The equivalence test was non-significant, t(78.57) = 1.002, p = 0.840, given equivalence bounds of -1.649 and 1.649 (on a raw scale) and an alpha of 0.05.
## Null Hypothesis Test Result:
## The null hypothesis test was significant, t(78.57) = 2.128, p = 0.0365, given an alpha of 0.05.
## Based on the equivalence test and the null-hypothesis test combined, we can conclude that the observed effect is statistically different from zero and statistically not equivalent to zero.
library(dplyr)
by_UVkomb <- group_by(SPACE02_data_adjusted, UVkomb)
summarise(by_UVkomb, mean_open.questions_count = mean(open.questions_count), sd_open.questions_count = sd(open.questions_count), n = n(), na.rm = TRUE)
| UVkomb | mean_open.questions_count | sd_open.questions_count | n | na.rm |
|---|---|---|---|---|
| mono.massed | 3.500000 | 2.345208 | 44 | TRUE |
| multi.massed | 4.192308 | 2.432281 | 39 | TRUE |
| mono.spaced | 4.268293 | 2.962468 | 41 | TRUE |
| multi.spaced | 5.325000 | 2.489851 | 40 | TRUE |
spaced/multimedia-spaced/text for open questions
library(TOSTER)
TOSTtwo(m1 = 5.325000, m2 = 4.268293, sd1 = 2.489851, sd2 = 2.962468, n1 = 40, n2 = 41, low_eqbound_d = -0.25, high_eqbound_d = 0.25, alpha = 0.05, var.equal = FALSE)
## TOST results:
## t-value lower bound: 2.87 p-value lower bound: 0.003
## t-value upper bound: 0.613 p-value upper bound: 0.729
## degrees of freedom : 77.32
##
## Equivalence bounds (Cohen's d):
## low eqbound: -0.25
## high eqbound: 0.25
##
## Equivalence bounds (raw scores):
## low eqbound: -0.6841
## high eqbound: 0.6841
##
## TOST confidence interval:
## lower bound 90% CI: 0.045
## upper bound 90% CI: 2.068
##
## NHST confidence interval:
## lower bound 95% CI: -0.153
## upper bound 95% CI: 2.266
##
## Equivalence Test Result:
## The equivalence test was non-significant, t(77.32) = 0.613, p = 0.729, given equivalence bounds of -0.684 and 0.684 (on a raw scale) and an alpha of 0.05.
## Null Hypothesis Test Result:
## The null hypothesis test was non-significant, t(77.32) = 1.739, p = 0.0859, given an alpha of 0.05.
## Based on the equivalence test and the null-hypothesis test combined, we can conclude that the observed effect is statistically not different from zero and statistically not equivalent to zero.
library(dplyr)
by_UVkomb <- group_by(SPACE02_data_adjusted, UVkomb)
summarise(by_UVkomb, mean_transfer.questions_count = mean(transfer.questions_count), sd_transfer.questions_count = sd(transfer.questions_count), n = n(), na.rm = TRUE)
| UVkomb | mean_transfer.questions_count | sd_transfer.questions_count | n | na.rm |
|---|---|---|---|---|
| mono.massed | 3.920454 | 3.211432 | 44 | TRUE |
| multi.massed | 4.525641 | 2.709507 | 39 | TRUE |
| mono.spaced | 3.463415 | 2.988290 | 41 | TRUE |
| multi.spaced | 4.937500 | 2.842416 | 40 | TRUE |
spaced/multimedia-spaced/text for transfer questions
library(TOSTER)
TOSTtwo(m1 = 4.937500, m2 = 3.463415, sd1 = 2.842416, sd2 = 2.988290, n1 = 40, n2 = 41, low_eqbound_d = -0.2, high_eqbound_d = 0.2, alpha = 0.05, var.equal = FALSE)
## TOST results:
## t-value lower bound: 3.18 p-value lower bound: 0.001
## t-value upper bound: 1.37 p-value upper bound: 0.913
## degrees of freedom : 78.95
##
## Equivalence bounds (Cohen's d):
## low eqbound: -0.2
## high eqbound: 0.2
##
## Equivalence bounds (raw scores):
## low eqbound: -0.5833
## high eqbound: 0.5833
##
## TOST confidence interval:
## lower bound 90% CI: 0.396
## upper bound 90% CI: 2.552
##
## NHST confidence interval:
## lower bound 95% CI: 0.184
## upper bound 95% CI: 2.764
##
## Equivalence Test Result:
## The equivalence test was non-significant, t(78.95) = 1.375, p = 0.913, given equivalence bounds of -0.583 and 0.583 (on a raw scale) and an alpha of 0.05.
## Null Hypothesis Test Result:
## The null hypothesis test was significant, t(78.95) = 2.275, p = 0.0256, given an alpha of 0.05.
## Based on the equivalence test and the null-hypothesis test combined, we can conclude that the observed effect is statistically different from zero and statistically not equivalent to zero.
library(dplyr)
by_UVkomb <- group_by(SPACE02_data_adjusted, UVkomb)
summarise(by_UVkomb, mean_recall_count = mean(recall_count), sd_recall_count = sd(recall_count), n = n(), na.rm = TRUE)
| UVkomb | mean_recall_count | sd_recall_count | n | na.rm |
|---|---|---|---|---|
| mono.massed | 18.46591 | 6.859383 | 44 | TRUE |
| multi.massed | 20.69231 | 5.293511 | 39 | TRUE |
| mono.spaced | 19.17073 | 6.912497 | 41 | TRUE |
| multi.spaced | 22.28750 | 6.261285 | 40 | TRUE |
library(TOSTER)
TOSTtwo(m1 = 22.28750, m2 = 20.69231, sd1 = 6.261285, sd2 = 5.293511, n1 = 40, n2 = 39, low_eqbound_d = -0.2, high_eqbound_d = 0.2, alpha = 0.05, var.equal = FALSE)
## TOST results:
## t-value lower bound: 2.11 p-value lower bound: 0.019
## t-value upper bound: 0.334 p-value upper bound: 0.630
## degrees of freedom : 75.5
##
## Equivalence bounds (Cohen's d):
## low eqbound: -0.2
## high eqbound: 0.2
##
## Equivalence bounds (raw scores):
## low eqbound: -1.1595
## high eqbound: 1.1595
##
## TOST confidence interval:
## lower bound 90% CI: -0.575
## upper bound 90% CI: 3.766
##
## NHST confidence interval:
## lower bound 95% CI: -1.001
## upper bound 95% CI: 4.191
##
## Equivalence Test Result:
## The equivalence test was non-significant, t(75.5) = 0.334, p = 0.630, given equivalence bounds of -1.160 and 1.160 (on a raw scale) and an alpha of 0.05.
## Null Hypothesis Test Result:
## The null hypothesis test was non-significant, t(75.5) = 1.224, p = 0.225, given an alpha of 0.05.
## Based on the equivalence test and the null-hypothesis test combined, we can conclude that the observed effect is statistically not different from zero and statistically not equivalent to zero.
library(dplyr)
by_UVkomb <- group_by(SPACE02_data_adjusted, UVkomb)
summarise(by_UVkomb, mean_open.questions_count = mean(open.questions_count), sd_open.questions_count = sd(open.questions_count), n = n(), na.rm = TRUE)
| UVkomb | mean_open.questions_count | sd_open.questions_count | n | na.rm |
|---|---|---|---|---|
| mono.massed | 3.500000 | 2.345208 | 44 | TRUE |
| multi.massed | 4.192308 | 2.432281 | 39 | TRUE |
| mono.spaced | 4.268293 | 2.962468 | 41 | TRUE |
| multi.spaced | 5.325000 | 2.489851 | 40 | TRUE |
spaced/multimedia-massed/multimedia for open questions
library(TOSTER)
TOSTtwo(m1 = 5.325000, m2 = 4.192308, sd1 = 2.489851, sd2 = 2.432281, n1 = 40, n2 = 39, low_eqbound_d = -0.2, high_eqbound_d = 0.2, alpha = 0.05, var.equal = FALSE)
## TOST results:
## t-value lower bound: 2.93 p-value lower bound: 0.002
## t-value upper bound: 1.16 p-value upper bound: 0.874
## degrees of freedom : 77
##
## Equivalence bounds (Cohen's d):
## low eqbound: -0.2
## high eqbound: 0.2
##
## Equivalence bounds (raw scores):
## low eqbound: -0.4922
## high eqbound: 0.4922
##
## TOST confidence interval:
## lower bound 90% CI: 0.211
## upper bound 90% CI: 2.055
##
## NHST confidence interval:
## lower bound 95% CI: 0.03
## upper bound 95% CI: 2.235
##
## Equivalence Test Result:
## The equivalence test was non-significant, t(77) = 1.156, p = 0.874, given equivalence bounds of -0.492 and 0.492 (on a raw scale) and an alpha of 0.05.
## Null Hypothesis Test Result:
## The null hypothesis test was significant, t(77) = 2.045, p = 0.0442, given an alpha of 0.05.
## Based on the equivalence test and the null-hypothesis test combined, we can conclude that the observed effect is statistically different from zero and statistically not equivalent to zero.
library(dplyr)
by_UVkomb <- group_by(SPACE02_data_adjusted, UVkomb)
summarise(by_UVkomb, mean_transfer.questions_count = mean(transfer.questions_count), sd_transfer.questions_count = sd(transfer.questions_count), n = n(), na.rm = TRUE)
| UVkomb | mean_transfer.questions_count | sd_transfer.questions_count | n | na.rm |
|---|---|---|---|---|
| mono.massed | 3.920454 | 3.211432 | 44 | TRUE |
| multi.massed | 4.525641 | 2.709507 | 39 | TRUE |
| mono.spaced | 3.463415 | 2.988290 | 41 | TRUE |
| multi.spaced | 4.937500 | 2.842416 | 40 | TRUE |
spaced/multimedia-massed/multimedia for transfer questions
library(TOSTER)
TOSTtwo(m1 = 4.937500, m2 = 4.525641, sd1 = 2.842416, sd2 = 2.709507, n1 = 40, n2 = 39, low_eqbound_d = -0.2, high_eqbound_d = 0.2, alpha = 0.05, var.equal = FALSE)
## TOST results:
## t-value lower bound: 1.55 p-value lower bound: 0.063
## t-value upper bound: -0.23 p-value upper bound: 0.409
## degrees of freedom : 76.96
##
## Equivalence bounds (Cohen's d):
## low eqbound: -0.2
## high eqbound: 0.2
##
## Equivalence bounds (raw scores):
## low eqbound: -0.5554
## high eqbound: 0.5554
##
## TOST confidence interval:
## lower bound 90% CI: -0.628
## upper bound 90% CI: 1.452
##
## NHST confidence interval:
## lower bound 95% CI: -0.832
## upper bound 95% CI: 1.656
##
## Equivalence Test Result:
## The equivalence test was non-significant, t(76.96) = -0.230, p = 0.409, given equivalence bounds of -0.555 and 0.555 (on a raw scale) and an alpha of 0.05.
## Null Hypothesis Test Result:
## The null hypothesis test was non-significant, t(76.96) = 0.659, p = 0.512, given an alpha of 0.05.
## Based on the equivalence test and the null-hypothesis test combined, we can conclude that the observed effect is statistically not different from zero and statistically not equivalent to zero.